rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
+#define TASK_HASH_SIZE 256
+#define TASK_HASH(_id) ((_id)&(TASK_HASH_SIZE-1))
+static struct task_struct *task_hash[TASK_HASH_SIZE];
+
/*
* create a new domain
*/
p->max_pages = p->tot_pages = 0;
write_lock_irqsave(&tasklist_lock, flags);
SET_LINKS(p);
+ p->next_hash = task_hash[TASK_HASH(dom_id)];
+ task_hash[TASK_HASH(dom_id)] = p;
write_unlock_irqrestore(&tasklist_lock, flags);
return(p);
}
-/* Get a pointer to the specified domain. Consider replacing this
- * with a hash lookup later.
- *
- * Also, kill_other_domain should call this instead of scanning on its own.
- */
struct task_struct *find_domain_by_id(unsigned int dom)
{
- struct task_struct *p = &idle0_task;
-
- read_lock_irq(&tasklist_lock);
- do {
- if ( (p->domain == dom) ) {
- get_task_struct(p); /* increment the refcnt for caller */
- read_unlock_irq(&tasklist_lock);
- return (p);
+ struct task_struct *p;
+ unsigned long flags;
+
+ read_lock_irqsave(&tasklist_lock, flags);
+ p = task_hash[TASK_HASH(dom)];
+ while ( p != NULL )
+ {
+ if ( p->domain == dom )
+ {
+ get_task_struct(p);
+ break;
}
- } while ( (p = p->next_task) != &idle0_task );
- read_unlock_irq(&tasklist_lock);
+ p = p->next_hash;
+ }
+ read_unlock_irqrestore(&tasklist_lock, flags);
- return 0;
+ return p;
}
/* Release resources belonging to task @p. */
void release_task(struct task_struct *p)
{
+ struct task_struct **pp;
+ unsigned long flags;
+
ASSERT(p->state == TASK_DYING);
ASSERT(!p->has_cpu);
printk("Releasing task %d\n", p->domain);
- write_lock_irq(&tasklist_lock);
+ write_lock_irqsave(&tasklist_lock, flags);
REMOVE_LINKS(p);
- write_unlock_irq(&tasklist_lock);
+ pp = &task_hash[TASK_HASH(p->domain)];
+ while ( *pp != p ) *pp = (*pp)->next_hash;
+ *pp = p->next_hash;
+ write_unlock_irqrestore(&tasklist_lock, flags);
/*
* This frees up blkdev rings. Totally safe since blkdev ref counting
unsigned long flags;
int i;
+ if ( vif == NULL )
+ return;
+
for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
if ( vif->domain->net_vif_list[i] == vif )
vif->domain->net_vif_list[i] = NULL;
*/
struct mm_struct *active_mm;
struct thread_struct thread;
- struct task_struct *prev_task, *next_task;
+ struct task_struct *prev_task, *next_task, *next_hash;
unsigned long flags;
typedef struct rx_shadow_entry_st
{
- /* IN vars */
unsigned short id;
+ unsigned short flush_count; /* 16 bits should be enough */
unsigned long pte_ptr;
unsigned long buf_pfn;
- /* PRIVATE vars */
- unsigned long flush_count;
} rx_shadow_entry_t;
typedef struct tx_shadow_entry_st
{
- /* IN vars */
unsigned short id;
+ unsigned short size;
void *header;
unsigned long payload;
- /* OUT vars */
- unsigned short size;
- unsigned char status;
} tx_shadow_entry_t;
typedef struct net_vif_st {
* network driver that called us should also have no nasty locks.
*/
rx = vif->rx_shadow_ring + vif->rx_cons;
- if ( rx->flush_count ==
+ if ( rx->flush_count == (unsigned short)
atomic_read(&tlb_flush_count[vif->domain->processor]) )
flush_tlb_cpu(vif->domain->processor);
tx = vif->tx_shadow_ring + vif->tx_cons;
vif->tx_cons = TX_RING_INC(vif->tx_cons);
- make_tx_response(vif, tx->id, tx->status);
+ make_tx_response(vif, tx->id, RING_STATUS_OK);
put_vif(vif);
}
if ( vif->tx_idx != vif->tx_prod )
add_to_net_schedule_list_tail(vif);
- ASSERT(tx->status == RING_STATUS_OK);
-
skb->destructor = tx_skb_release;
skb->head = skb->data = tx->header;
{
vif->tx_shadow_ring[j].id = tx.id;
vif->tx_shadow_ring[j].size = tx.size;
- vif->tx_shadow_ring[j].status = RING_STATUS_OK;
vif->tx_shadow_ring[j].header =
kmem_cache_alloc(net_header_cachep, GFP_KERNEL);
if ( vif->tx_shadow_ring[j].header == NULL )
vif->rx_shadow_ring[j].id = rx.id;
vif->rx_shadow_ring[j].pte_ptr = rx.addr;
vif->rx_shadow_ring[j].buf_pfn = buf_pfn;
- vif->rx_shadow_ring[j].flush_count =
+ vif->rx_shadow_ring[j].flush_count = (unsigned short)
atomic_read(&tlb_flush_count[smp_processor_id()]);
j = RX_RING_INC(j);